home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
C/C++ Users Group Library 1996 July
/
C-C++ Users Group Library July 1996.iso
/
listings
/
v_10_04
/
1004028a
< prev
next >
Wrap
Text File
|
1992-02-18
|
3KB
|
103 lines
/* Listing 4. */
typedef struct {
float X1, X2;
} ARG_F_2; /* vector 2 */
#include "float.h"
#include <errno.h>
#ifndef errno
extern int errno;
#endif
#define MANTBITS (FLT_MANT_DIG -1)
ARG_F_2 powf_2(xi1, y1, xi2, y2)
union fltfmt {
float flt;
int iflt; /* VAX: must change all this */
struct ffmt {
unsigned int ex:9;
unsigned int mant:MANTBITS;
} fmt;
} xi1, xi2, y1, y2;
{
#define max(i,j) ((i)>(j)?(i):(j))
#define min(i,j) ((i)<(j)?(i):(j))
#if FLT_MANT_DIG != 24
#error "use portable frexp() ldexp() */
#endif
#if FLT_ROUNDS == 1
#if defined(__STDC__)
/* This works on some non-ANSI compilers */
#define ROUND(x) ((x)>=0?( \
(x)+1/LDBL_EPSILON)-1/LDBL_EPSILON: \
((x)-1/LDBL_EPSILON)+1/LDBL_EPSILON)
#else
#define ROUND(x) ((x)>=0?( \
(x)+1/LDBL_EPSILON)*1-1/LDBL_EPSILON: \
((x)-1/LDBL_EPSILON)*1+1/LDBL_EPSILON)
#endif
#else
#define ROUND(x) ((x)>=0?(int)(x+.5):(int)(x-.5))
#endif
int mi, mi2, msign;
double xr, x2, r, r1;
ARG_F_2 res;
/* Copy 1 */
/* This frexp() operation would be done better after
** promotion to double
** but it's not mandatory unless dealing with gradual
** underflow;
** it would eliminate most cases of 0 and Inf changing
** to finite numbers
** if((xi1.flt=frexp(xi1.flt,&mi))<sqrt(.5)){
--mi;
xi1.flt *= 2;
} */
mi = ((xi1.iflt & 0x7fffffff) -
(mi2 = (xi1.fmt.mant < 0x3504f3 ?
(2 - FLT_MIN_EXP) << MANTBITS :
(1 - FLT_MIN_EXP) << MANTBITS)))
>> MANTBITS;
if (xi1.iflt < 0 | xi2.iflt < 0) errno = EDOM;
xi1.iflt = mi2 | xi1.fmt.mant;
/* Mult by y distributed to increase parallelism */
r1 = (xr = (xi1.flt - 1) / (xi1.flt + 1)) * y1.flt;
x2 = xr * xr;
/* Coefficients determined by Chebyshef fitting
** double precision is only really needed from here */
r = y1.flt * (double) mi + r1 * (2.8853904 +
x2 * (.5958 * x2 + .961588));
/* Msign = (r -= rint(r)) <0 */
msign = (r -= r1 = ROUND(r)) < 0;
r *= 125.0718418 + (x2 = r * r);
x2 = 360.8810526 + 17.3342336 * x2;
/* Xi1.flt = ldexp((x2+r)/(x2-r),(int)r1) */
xi1.flt = (x2 + r) / (x2 - r);
/* Preferably do this ldexp() operation in double,
** but it's slower,
** even though msign can be eliminated;
** it would always give Inf rather than NaN
** and would allow use of gradual underflow */
xi1.iflt += (max(FLT_MIN_EXP - 2 + msign,
min(FLT_MAX_EXP + msign, (int) r1)) << MANTBITS);
/* X.fmt.ex+=mi; with limiting to prevent exponent wraparound */
res.X1 = xi1.flt;
/* Copy 2 */
mi = ((xi2.iflt & 0x7fffffff) -
(mi2 = (xi2.fmt.mant < 0x3504f3 ?
(2 - FLT_MIN_EXP) << MANTBITS :
(1 - FLT_MIN_EXP) << MANTBITS)))
>> MANTBITS;
xi2.iflt = mi2 | xi2.fmt.mant;
r1 = (xr = (xi2.flt - 1) / (xi2.flt + 1)) * y2.flt;
r1 *= x2 = xr * xr;
r = y2.flt * ((double) mi + xr * 2.8853904) +
r1 * (.5958 * x2 + .961588);
msign = (r -= r1 = ROUND(r)) < 0;
r *= 125.0718418 + (x2 = r * r);
x2 = 360.8810526 + 17.3342336 * x2;
xi2.flt = (x2 + r) / (x2 - r);
xi2.iflt += (max(FLT_MIN_EXP - 2 + msign,
min(FLT_MAX_EXP + msign, (int) r1)) << MANTBITS);
res.X2 = xi2.flt;
return res;
}